struct domain *d, unsigned int isa_irq)
{
struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
- unsigned int gsi = (isa_irq == 0) ? 2 : isa_irq;
+ unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq);
ASSERT(isa_irq <= 15);
struct domain *d, unsigned int isa_irq)
{
struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
- unsigned int gsi = (isa_irq == 0) ? 2 : isa_irq;
+ unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq);
ASSERT(isa_irq <= 15);
return -1;
}
-int get_intr_vector(struct vcpu* v, int irq, int type)
+int get_isa_irq_vector(struct vcpu *v, int isa_irq, int type)
{
+ unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq);
+
if ( type == APIC_DM_EXTINT )
- return v->domain->arch.hvm_domain.irq.vpic[irq >> 3].irq_base
- + (irq & 0x7);
+ return (v->domain->arch.hvm_domain.irq.vpic[isa_irq >> 3].irq_base
+ + (isa_irq & 7));
- return domain_vioapic(v->domain)->redirtbl[irq].fields.vector;
+ return domain_vioapic(v->domain)->redirtbl[gsi].fields.vector;
}
-int is_irq_masked(struct vcpu *v, int irq)
+int is_isa_irq_masked(struct vcpu *v, int isa_irq)
{
- if ( is_lvtt(v, irq) )
- return !is_lvtt_enabled(v);
+ unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq);
- if ( v->domain->arch.hvm_domain.irq.vpic[irq >> 3].imr & (1 << (irq & 7))
- && domain_vioapic(v->domain)->redirtbl[irq].fields.mask )
- return 1;
+ if ( is_lvtt(v, isa_irq) )
+ return !is_lvtt_enabled(v);
- return 0;
+ return ((v->domain->arch.hvm_domain.irq.vpic[isa_irq >> 3].imr &
+ (1 << (isa_irq & 7))) &&
+ domain_vioapic(v->domain)->redirtbl[gsi].fields.mask);
}
{
#ifdef IRQ0_SPECIAL_ROUTING
/* Force round-robin to pick VCPU 0 */
- if ( irq == 0 )
+ if ( irq == hvm_isa_irq_to_gsi(0) )
{
v = vioapic_domain(vioapic)->vcpu[0];
target = v ? vcpu_vlapic(v) : NULL;
deliver_bitmask &= ~(1 << bit);
#ifdef IRQ0_SPECIAL_ROUTING
/* Do not deliver timer interrupts to VCPU != 0 */
- if ( (irq == 0) && (bit != 0) )
+ if ( irq == hvm_isa_irq_to_gsi(0) )
v = vioapic_domain(vioapic)->vcpu[0];
else
#endif
list_for_each( list, head )
{
pt = list_entry(list, struct periodic_time, list);
- if ( !is_irq_masked(v, pt->irq) && pt->pending_intr_nr
- && pt->last_plt_gtime + pt->period < max_lag )
+ if ( !is_isa_irq_masked(v, pt->irq) && pt->pending_intr_nr &&
+ ((pt->last_plt_gtime + pt->period_cycles) < max_lag) )
{
- max_lag = pt->last_plt_gtime + pt->period;
+ max_lag = pt->last_plt_gtime + pt->period_cycles;
irq = pt->irq;
}
}
if ( is_lvtt(v, irq) )
+ {
vlapic_set_irq(vcpu_vlapic(v), irq, 0);
+ }
else if ( irq >= 0 )
{
hvm_isa_irq_deassert(v->domain, irq);
if ( is_lvtt(v, pt->irq) )
{
- if (pt->irq == vector)
- return pt;
- else
+ if ( pt->irq != vector )
continue;
+ return pt;
}
- vec = get_intr_vector(v, pt->irq, type);
+ vec = get_isa_irq_vector(v, pt->irq, type);
/* RTC irq need special care */
- if ( vector != vec || (pt->irq == 8 && !is_rtc_periodic_irq(rtc)) )
+ if ( (vector != vec) || (pt->irq == 8 && !is_rtc_periodic_irq(rtc)) )
continue;
return pt;
{
struct periodic_time *pt = is_pt_irq(v, vector, type);
- if (pt == NULL)
+ if ( pt == NULL )
return;
pt->pending_intr_nr--;
pt->last_plt_gtime += pt->period_cycles;
hvm_set_guest_time(pt->vcpu, pt->last_plt_gtime);
- if (pt->cb)
+ if ( pt->cb != NULL )
pt->cb(pt->vcpu, pt->priv);
}
destroy_periodic_time(pt);
pt->enabled = 1;
- if (period < 900000) /* < 0.9 ms */
+ if ( period < 900000 ) /* < 0.9 ms */
{
- printk("HVM_PlatformTime: program too small period %"PRIu64"\n", period);
+ gdprintk(XENLOG_WARNING,
+ "HVM_PlatformTime: program too small period %"PRIu64"\n",
+ period);
period = 900000; /* force to 0.9ms */
}
pt->period = period;
/*
* Number of wires asserting each GSI.
*
- * GSIs 0-15 are the ISA IRQs. ISA devices map directly into this space.
+ * GSIs 0-15 are the ISA IRQs. ISA devices map directly into this space
+ * except ISA IRQ 0, which is connected to GSI 2.
* PCI links map into this space via the PCI-ISA bridge.
*
* GSIs 16+ are used only be PCI devices. The mapping from PCI device to
#define hvm_pci_intx_link(dev, intx) \
(((dev) + (intx)) & 3)
+#define hvm_isa_irq_to_gsi(isa_irq) ((isa_irq) ? : 2)
+
/* Modify state of a PCI INTx wire. */
void hvm_pci_intx_assert(
struct domain *d, unsigned int device, unsigned int intx);
int cpu_get_interrupt(struct vcpu *v, int *type);
int cpu_has_pending_irq(struct vcpu *v);
-int get_intr_vector(struct vcpu* vcpu, int irq, int type);
-int is_irq_masked(struct vcpu *v, int irq);
+int get_isa_irq_vector(struct vcpu *vcpu, int irq, int type);
+int is_isa_irq_masked(struct vcpu *v, int isa_irq);
#endif /* __ASM_X86_HVM_IRQ_H__ */